Import packages¶
In [1]:
import sys
print(sys.version)
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from rasterio.plot import show
from datetime import datetime, timedelta
import os
import pandas as pd
import rasterio
# Get today's date
today_date = datetime.today().strftime('%Y-%m-%d')
3.12.5 | packaged by Anaconda, Inc. | (main, Sep 12 2024, 18:18:29) [MSC v.1929 64 bit (AMD64)]
Import the tabular data¶
In [2]:
buffalo_id = 2005
n_samples = 10297 # 2005 has 10297 samples
# buffalo_id = 2018
# n_samples = 9440 # 2018 has 9440 samples
# Specify the path to your CSV file
csv_file_path = f'../buffalo_local_data_id/buffalo_{buffalo_id}_data_df_lag_1hr_n{n_samples}.csv'
# Read the CSV file into a DataFrame
buffalo_df = pd.read_csv(csv_file_path)
# Display the first few rows of the DataFrame
print(buffalo_df.head())
x_ y_ t_ id x1_ \
0 41969.310875 -1.435671e+06 2018-07-25T01:04:23Z 2005 41969.310875
1 41921.521939 -1.435654e+06 2018-07-25T02:04:39Z 2005 41921.521939
2 41779.439594 -1.435601e+06 2018-07-25T03:04:17Z 2005 41779.439594
3 41841.203272 -1.435635e+06 2018-07-25T04:04:39Z 2005 41841.203272
4 41655.463332 -1.435604e+06 2018-07-25T05:04:27Z 2005 41655.463332
y1_ x2_ y2_ x2_cent y2_cent ... \
0 -1.435671e+06 41921.521939 -1.435654e+06 -47.788936 16.857110 ...
1 -1.435654e+06 41779.439594 -1.435601e+06 -142.082345 53.568427 ...
2 -1.435601e+06 41841.203272 -1.435635e+06 61.763677 -34.322938 ...
3 -1.435635e+06 41655.463332 -1.435604e+06 -185.739939 31.003534 ...
4 -1.435604e+06 41618.651923 -1.435608e+06 -36.811409 -4.438037 ...
bearing bearing_sin bearing_cos ta cos_ta x_min \
0 2.802478 0.332652 -0.943050 1.367942 0.201466 40706.810875
1 2.781049 0.352783 -0.935705 -0.021429 0.999770 40659.021939
2 -0.507220 -0.485749 0.874098 2.994917 -0.989262 40516.939594
3 2.976198 0.164641 -0.986354 -2.799767 -0.942144 40578.703272
4 -3.021610 -0.119695 -0.992811 0.285377 0.959556 40392.963332
x_max y_min y_max ndvi_index
0 43231.810875 -1.436934e+06 -1.434409e+06 8
1 43184.021939 -1.436917e+06 -1.434392e+06 8
2 43041.939594 -1.436863e+06 -1.434338e+06 8
3 43103.703272 -1.436898e+06 -1.434373e+06 8
4 42917.963332 -1.436867e+06 -1.434342e+06 8
[5 rows x 32 columns]
Importing spatial data¶
Global layers¶
NDVI¶
In [3]:
# for monthly NDVI
file_path = '../mapping/cropped rasters/ndvi_monthly.tif'
# read the raster file
with rasterio.open(file_path) as src:
# Read the raster band as separate variable
ndvi_global = src.read([i for i in range(1, src.count + 1)])
# Get the metadata of the raster
ndvi_meta = src.meta
raster_transform = src.transform
# Print the metadata to check for time component
print("Metadata:", ndvi_meta)
# Check for specific time-related metadata
if 'TIFFTAG_DATETIME' in src.tags():
print("Time component found:", src.tags()['TIFFTAG_DATETIME'])
else:
print("No explicit time component found in metadata.")
# the rasters don't contain a time component
Metadata: {'driver': 'GTiff', 'dtype': 'float32', 'nodata': nan, 'width': 2400, 'height': 2280, 'count': 24, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
0.0, -25.0, -1406000.0)}
No explicit time component found in metadata.
In [4]:
print(ndvi_meta)
print(raster_transform)
print(ndvi_global.shape)
# Replace NaNs in the original array with -1, which represents water
ndvi_global = np.nan_to_num(ndvi_global, nan=-1.0)
# from the stack of local layers
ndvi_max = 0.8220
ndvi_min = -0.2772
ndvi_global_tens = torch.from_numpy(ndvi_global)
# Normalizing the data
ndvi_global_norm = (ndvi_global_tens - ndvi_min) / (ndvi_max - ndvi_min)
# plt.imshow(ndvi_global_norm.numpy())
# plt.colorbar()
# plt.show()
plt.imshow(ndvi_global_norm[1,:,:].numpy())
plt.colorbar()
plt.show()
plt.imshow(ndvi_global_norm[8,:,:].numpy())
plt.colorbar()
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': nan, 'width': 2400, 'height': 2280, 'count': 24, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
0.0, -25.0, -1406000.0)}
| 25.00, 0.00, 0.00|
| 0.00,-25.00,-1406000.00|
| 0.00, 0.00, 1.00|
(24, 2280, 2400)
Canopy cover¶
In [5]:
file_path = '../mapping/cropped rasters/canopy_cover.tif'
# read the raster file
with rasterio.open(file_path) as src:
# Read the raster band as separate variable
canopy_global = src.read(1)
# Get the metadata of the raster
canopy_meta = src.meta
In [6]:
print(canopy_meta)
print(canopy_global.shape)
# from the stack of local layers
canopy_max = 82.5000
canopy_min = 0.0
canopy_global_tens = torch.from_numpy(canopy_global)
# Normalizing the data
canopy_global_norm = (canopy_global_tens - canopy_min) / (canopy_max - canopy_min)
plt.imshow(canopy_global_norm.numpy())
plt.colorbar()
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.3999999521443642e+38, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
0.0, -25.0, -1406000.0)}
(2280, 2400)
Herbaceous vegetation¶
In [7]:
file_path = '../mapping/cropped rasters/veg_herby.tif'
# read the raster file
with rasterio.open(file_path) as src:
# Read the raster band as separate variable
herby_global = src.read(1)
# Get the metadata of the raster
herby_meta = src.meta
In [8]:
print(herby_meta)
print(herby_global.shape)
# from the stack of local layers
herby_max = 1.0
herby_min = 0.0
herby_global_tens = torch.from_numpy(herby_global)
# Normalizing the data
herby_global_norm = (herby_global_tens - herby_min) / (herby_max - herby_min)
plt.imshow(herby_global_norm.numpy())
plt.colorbar()
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.3999999521443642e+38, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
0.0, -25.0, -1406000.0)}
(2280, 2400)
Slope¶
In [9]:
file_path = '../mapping/cropped rasters/slope.tif'
# read the raster file
with rasterio.open(file_path) as src:
# Read the raster band as separate variable
slope_global = src.read(1)
# Get the metadata of the raster
slope_meta = src.meta
slope_transform = src.transform # same as the raster transform in the NDVI raster read
print(slope_global)
print(slope_transform)
[[ nan nan nan ... nan nan nan] [ nan 0.3352837 0.39781624 ... 0. 0. nan] [ nan 0.3983888 0.48142004 ... 0. 0. nan] ... [ nan 2.215875 1.9798415 ... 1.5078747 1.268342 nan] [ nan 1.9740707 1.7354656 ... 1.697194 1.4880029 nan] [ nan nan nan ... nan nan nan]] | 25.00, 0.00, 0.00| | 0.00,-25.00,-1406000.00| | 0.00, 0.00, 1.00|
In [10]:
print(slope_meta)
print(slope_global.shape)
# Replace NaNs in the original array with -1, which represents water
slope_global = np.nan_to_num(slope_global, nan=0.0)
# from the stack of local layers
slope_max = 12.2981
slope_min = 0.0006
slope_global_tens = torch.from_numpy(slope_global)
# Normalizing the data
slope_global_norm = (slope_global_tens - slope_min) / (slope_max - slope_min)
plt.imshow(slope_global_norm.numpy())
plt.colorbar()
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': nan, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
0.0, -25.0, -1406000.0)}
(2280, 2400)
Testing functions for selecting subsets of the raster layer, using torch objects
In [11]:
# Create a figure and axis with matplotlib
fig, ax = plt.subplots(figsize=(7.5, 7.5))
# Plot the raster
show(slope_global, transform=raster_transform, ax=ax, cmap='viridis')
# Set the title and labels
ax.set_title('Raster with Geographic Coordinates')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# Show the plot
plt.show()
In [12]:
x, y = 5.9e4, -1.447e6
print(x, y)
# Convert geographic coordinates to pixel coordinates
px, py = ~raster_transform * (x, y)
# Round pixel coordinates to integers
px, py = int(round(px)), int(round(py))
# Print the pixel coordinates
print(px, py)
59000.0 -1447000.0 2360 1640
In [13]:
window_size = 101
# Define half the window size
half_window = window_size // 2
# Calculate the window boundaries
row_start = py - half_window
row_stop = py + half_window + 1
col_start = px - half_window
col_stop = px + half_window + 1
# Initialize the subset array with zeros (or any other padding value)
subset = np.zeros((window_size, window_size), dtype=slope_global.dtype)
# Calculate the valid region within the raster bounds
valid_row_start = max(0, row_start)
valid_row_stop = min(slope_global.shape[0], row_stop)
valid_col_start = max(0, col_start)
valid_col_stop = min(slope_global.shape[1], col_stop)
# Calculate the corresponding region in the subset array
subset_row_start = valid_row_start - row_start
subset_row_stop = subset_row_start + (valid_row_stop - valid_row_start)
subset_col_start = valid_col_start - col_start
subset_col_stop = subset_col_start + (valid_col_stop - valid_col_start)
# Copy the valid region from the raster array to the subset array
subset[subset_row_start:subset_row_stop, subset_col_start:subset_col_stop] = \
slope_global[valid_row_start:valid_row_stop, valid_col_start:valid_col_stop]
Plot the raster layer
In [14]:
# plot the subset
plt.imshow(subset, cmap='viridis')
plt.colorbar()
plt.show()
Subset function¶
In [15]:
def subset_raster_with_padding_torch(raster_tensor, x, y, window_size, transform):
# Convert geographic coordinates to pixel coordinates
px, py = ~transform * (x, y)
# Round pixel coordinates to integers
px, py = int(round(px)), int(round(py))
# Define half the window size
half_window = window_size // 2
# Calculate the window boundaries
row_start = py - half_window
row_stop = py + half_window + 1
col_start = px - half_window
col_stop = px + half_window + 1
# Initialize the subset tensor with zeros (or any other padding value)
subset = torch.full((window_size, window_size), -1.0, dtype=raster_tensor.dtype)
# Calculate the valid region within the raster bounds
valid_row_start = max(0, row_start)
valid_row_stop = min(raster_tensor.shape[0], row_stop)
valid_col_start = max(0, col_start)
valid_col_stop = min(raster_tensor.shape[1], col_stop)
# Calculate the corresponding region in the subset tensor
subset_row_start = valid_row_start - row_start
subset_row_stop = subset_row_start + (valid_row_stop - valid_row_start)
subset_col_start = valid_col_start - col_start
subset_col_stop = subset_col_start + (valid_col_stop - valid_col_start)
# Copy the valid region from the raster tensor to the subset tensor
subset[subset_row_start:subset_row_stop, subset_col_start:subset_col_stop] = \
raster_tensor[valid_row_start:valid_row_stop, valid_col_start:valid_col_stop]
return subset, col_start, row_start
Testing the subset function
In [16]:
x = 5.9e4
y = -1.41e6
window_size = 101
which_ndvi = 1
ndvi_subset, origin_x, origin_y = subset_raster_with_padding_torch(ndvi_global_norm[which_ndvi,:,:], x, y, window_size, raster_transform)
canopy_subset, origin_x, origin_y = subset_raster_with_padding_torch(canopy_global_norm, x, y, window_size, raster_transform)
herby_subset, origin_x, origin_y = subset_raster_with_padding_torch(herby_global_norm, x, y, window_size, raster_transform)
slope_subset, origin_x, origin_y = subset_raster_with_padding_torch(slope_global_norm, x, y, window_size, raster_transform)
# Plot the subset
fig, axs = plt.subplots(2, 2, figsize=(10, 10))
axs[0, 0].imshow(ndvi_subset.numpy(), cmap='viridis')
axs[0, 0].set_title('NDVI Subset')
# axs[0, 0].axis('off')
axs[0, 1].imshow(canopy_subset.numpy(), cmap='viridis')
axs[0, 1].set_title('Canopy Cover Subset')
# axs[0, 1].axis('off')
axs[1, 0].imshow(herby_subset.numpy(), cmap='viridis')
axs[1, 0].set_title('Herbaceous Vegetation Subset')
# axs[1, 0].axis('off')
axs[1, 1].imshow(slope_subset.numpy(), cmap='viridis')
axs[1, 1].set_title('Slope Subset')
# axs[1, 1].axis('off')
Out[16]:
Text(0.5, 1.0, 'Slope Subset')
In [17]:
x = 3.7e4
y = -1.435e6
print(x, y)
# Convert geographic coordinates to pixel coordinates
px, py = ~raster_transform * (x, y)
# Round pixel coordinates to integers
px, py = int(round(px)), int(round(py))
# Print the pixel coordinates
print(px, py)
37000.0 -1435000.0 1480 1160
Running the model on the subset layers¶
Set the device for the model¶
In [18]:
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f"Using {device} device")
Using cpu device
Define the model¶
In [19]:
class Conv2d_block_toFC(nn.Module):
def __init__(self, params):
super(Conv2d_block_toFC, self).__init__()
self.batch_size = params.batch_size
self.input_channels = params.input_channels
self.output_channels = params.output_channels
self.kernel_size = params.kernel_size
self.stride = params.stride
self.kernel_size_mp = params.kernel_size_mp
self.stride_mp = params.stride_mp
self.padding = params.padding
self.image_dim = params.image_dim
self.device = params.device
self.conv2d = nn.Sequential(
nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
nn.ReLU(),
nn.MaxPool2d(kernel_size=self.kernel_size_mp, stride=self.stride_mp),
nn.Conv2d(in_channels=self.output_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
nn.ReLU(),
nn.MaxPool2d(kernel_size=self.kernel_size_mp, stride=self.stride_mp),
nn.Flatten())
def forward(self, x):
return self.conv2d(x)
class Conv2d_block_spatial(nn.Module):
def __init__(self, params):
super(Conv2d_block_spatial, self).__init__()
self.batch_size = params.batch_size
self.input_channels = params.input_channels
self.output_channels = params.output_channels
self.kernel_size = params.kernel_size
self.stride = params.stride
# self.kernel_size_mp = params.kernel_size_mp
# self.stride_mp = params.stride_mp
self.padding = params.padding
self.image_dim = params.image_dim
self.device = params.device
self.conv2d = nn.Sequential(
nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
nn.ReLU(),
nn.Conv2d(in_channels=self.output_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
nn.ReLU(),
nn.Conv2d(in_channels=self.output_channels, out_channels=1, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
)
def forward(self, x):
conv2d_spatial = self.conv2d(x).squeeze(dim = 1)
conv2d_spatial = conv2d_spatial - torch.logsumexp(conv2d_spatial, dim = (1, 2), keepdim = True)
return conv2d_spatial
class FCN_block_all_habitat(nn.Module):
def __init__(self, params):
super(FCN_block_all_habitat, self).__init__()
self.batch_size = params.batch_size
self.dense_dim_in_all = params.dense_dim_in_all
self.dense_dim_hidden = params.dense_dim_hidden
self.dense_dim_out = params.dense_dim_out
self.image_dim = params.image_dim
self.device = params.device
self.dropout = params.dropout
self.ffn = nn.Sequential(
nn.Linear(self.dense_dim_in_all, self.dense_dim_hidden),
nn.Dropout(self.dropout),
nn.ReLU(),
nn.Linear(self.dense_dim_hidden, self.dense_dim_hidden),
nn.Dropout(self.dropout),
nn.ReLU(),
nn.Linear(self.dense_dim_hidden, self.image_dim * self.image_dim)
)
def forward(self, x):
return self.ffn(x)
class FCN_block_all_movement(nn.Module):
def __init__(self, params):
super(FCN_block_all_movement, self).__init__()
self.batch_size = params.batch_size
self.dense_dim_in_all = params.dense_dim_in_all
self.dense_dim_hidden = params.dense_dim_hidden
self.dense_dim_out = params.dense_dim_out
self.image_dim = params.image_dim
self.device = params.device
self.num_movement_params = params.num_movement_params
self.dropout = params.dropout
self.ffn = nn.Sequential(
nn.Linear(self.dense_dim_in_all, self.dense_dim_hidden),
nn.Dropout(self.dropout),
nn.ReLU(),
nn.Linear(self.dense_dim_hidden, self.dense_dim_hidden),
nn.Dropout(self.dropout),
nn.ReLU(),
nn.Linear(self.dense_dim_hidden, self.num_movement_params)
)
def forward(self, x):
return self.ffn(x)
##################################################
# Mixture of two Gamma and von Mises distributions with the von Mises mu parameters allowed to vary
##################################################
class Params_to_Grid_Block(nn.Module):
def __init__(self, params):
super(Params_to_Grid_Block, self).__init__()
self.batch_size = params.batch_size
self.image_dim = params.image_dim
self.pixel_size = params.pixel_size
self.center = self.image_dim // 2
y, x = np.indices((self.image_dim, self.image_dim))
self.distance_layer = torch.from_numpy(np.sqrt((self.pixel_size*(x - self.center))**2 + (self.pixel_size*(y - self.center))**2))
# change the centre cell to the average distance from the centre to the edge of the pixel
self.distance_layer[self.center, self.center] = 0.56*self.pixel_size # average distance from the centre to the perimeter of the pixel (accounting for longer distances at the corners)
# self.bearing_layer = torch.from_numpy(np.arctan2(y - self.center, x - self.center))
self.bearing_layer = torch.from_numpy(np.arctan2(self.center - y, x - self.center))
self.device = params.device
# Gamma desnities for the mixture distribution
def gamma_density(self, x, shape, scale):
# Ensure all tensors are on the same device as x
shape = shape.to(x.device)
scale = scale.to(x.device)
return -1*torch.lgamma(shape) -shape*torch.log(scale) + (shape - 1)*torch.log(x) - x/scale
# von Mises densities for the mixture distribution
def vonmises_density(self, x, kappa, vm_mu):
# Ensure all tensors are on the same device as x
kappa = kappa.to(x.device)
vm_mu = vm_mu.to(x.device)
return kappa*torch.cos(x - vm_mu) - 1*(np.log(2*torch.pi) + torch.log(torch.special.i0(kappa)))
def forward(self, x, bearing):
# parameters of the first mixture distribution
gamma_shape1 = torch.exp(x[:, 0]).unsqueeze(0).unsqueeze(0)
gamma_shape1 = gamma_shape1.repeat(self.image_dim, self.image_dim, 1)
gamma_shape1 = gamma_shape1.permute(2, 0, 1)
gamma_scale1 = torch.exp(x[:, 1]).unsqueeze(0).unsqueeze(0)
gamma_scale1 = gamma_scale1.repeat(self.image_dim, self.image_dim, 1)
gamma_scale1 = gamma_scale1.permute(2, 0, 1)
gamma_weight1 = torch.exp(x[:, 2]).unsqueeze(0).unsqueeze(0)
gamma_weight1 = gamma_weight1.repeat(self.image_dim, self.image_dim, 1)
gamma_weight1 = gamma_weight1.permute(2, 0, 1)
# parameters of the second mixture distribution
gamma_shape2 = torch.exp(x[:, 3]).unsqueeze(0).unsqueeze(0)
gamma_shape2 = gamma_shape2.repeat(self.image_dim, self.image_dim, 1)
gamma_shape2 = gamma_shape2.permute(2, 0, 1)
gamma_scale2 = torch.exp(x[:, 4]).unsqueeze(0).unsqueeze(0)
gamma_scale2 = gamma_scale2 * 500 ### to transform the scale parameter to be near 1
gamma_scale2 = gamma_scale2.repeat(self.image_dim, self.image_dim, 1)
gamma_scale2 = gamma_scale2.permute(2, 0, 1)
gamma_weight2 = torch.exp(x[:, 5]).unsqueeze(0).unsqueeze(0)
gamma_weight2 = gamma_weight2.repeat(self.image_dim, self.image_dim, 1)
gamma_weight2 = gamma_weight2.permute(2, 0, 1)
# Apply softmax to the weights
gamma_weights = torch.stack([gamma_weight1, gamma_weight2], dim=0)
gamma_weights = torch.nn.functional.softmax(gamma_weights, dim=0)
gamma_weight1 = gamma_weights[0]
gamma_weight2 = gamma_weights[1]
# calculation of Gamma densities
gamma_density_layer1 = self.gamma_density(self.distance_layer, gamma_shape1, gamma_scale1).to(device)
gamma_density_layer2 = self.gamma_density(self.distance_layer, gamma_shape2, gamma_scale2).to(device)
# combining both densities to create a mixture distribution using logsumexp
logsumexp_gamma_corr = torch.max(gamma_density_layer1, gamma_density_layer2)
gamma_density_layer = logsumexp_gamma_corr + torch.log(gamma_weight1 * torch.exp(gamma_density_layer1 - logsumexp_gamma_corr) + gamma_weight2 * torch.exp(gamma_density_layer2 - logsumexp_gamma_corr))
## Von Mises Distributions
# calculate the new bearing from the turning angle
# takes in the bearing from the previous step and adds the turning angle
bearing_new1 = x[:, 6] + bearing[:, 0]
# the new bearing becomes the mean of the von Mises distribution
# the estimated parameter [x:, 6] is the turning angle of the next step
# which is always in reference to the input bearing
vonmises_mu1 = bearing_new1.unsqueeze(0).unsqueeze(0)
vonmises_mu1 = vonmises_mu1.repeat(self.image_dim, self.image_dim, 1)
vonmises_mu1 = vonmises_mu1.permute(2, 0, 1)
# parameters of the first von Mises distribution
vonmises_kappa1 = torch.exp(x[:, 7]).unsqueeze(0).unsqueeze(0)
vonmises_kappa1 = vonmises_kappa1.repeat(self.image_dim, self.image_dim, 1)
vonmises_kappa1 = vonmises_kappa1.permute(2, 0, 1)
vonmises_weight1 = torch.exp(x[:, 8]).unsqueeze(0).unsqueeze(0)
vonmises_weight1 = vonmises_weight1.repeat(self.image_dim, self.image_dim, 1)
vonmises_weight1 = vonmises_weight1.permute(2, 0, 1)
# vm_mu and weight for the second von Mises distribution
bearing_new2 = x[:, 9] + bearing[:, 0]
vonmises_mu2 = bearing_new2.unsqueeze(0).unsqueeze(0)
vonmises_mu2 = vonmises_mu2.repeat(self.image_dim, self.image_dim, 1)
vonmises_mu2 = vonmises_mu2.permute(2, 0, 1)
# parameters of the second von Mises distribution
vonmises_kappa2 = torch.exp(x[:, 10]).unsqueeze(0).unsqueeze(0)
vonmises_kappa2 = vonmises_kappa2.repeat(self.image_dim, self.image_dim, 1)
vonmises_kappa2 = vonmises_kappa2.permute(2, 0, 1)
vonmises_weight2 = torch.exp(x[:, 11]).unsqueeze(0).unsqueeze(0)
vonmises_weight2 = vonmises_weight2.repeat(self.image_dim, self.image_dim, 1)
vonmises_weight2 = vonmises_weight2.permute(2, 0, 1)
# Apply softmax to the weights
vonmises_weights = torch.stack([vonmises_weight1, vonmises_weight2], dim=0)
vonmises_weights = torch.nn.functional.softmax(vonmises_weights, dim=0)
vonmises_weight1 = vonmises_weights[0]
vonmises_weight2 = vonmises_weights[1]
# calculation of von Mises densities
vonmises_density_layer1 = self.vonmises_density(self.bearing_layer, vonmises_kappa1, vonmises_mu1).to(device)
vonmises_density_layer2 = self.vonmises_density(self.bearing_layer, vonmises_kappa2, vonmises_mu2).to(device)
# combining both densities to create a mixture distribution using the logsumexp trick
logsumexp_vm_corr = torch.max(vonmises_density_layer1, vonmises_density_layer2)
vonmises_density_layer = logsumexp_vm_corr + torch.log(vonmises_weight1 * torch.exp(vonmises_density_layer1 - logsumexp_vm_corr) + vonmises_weight2 * torch.exp(vonmises_density_layer2 - logsumexp_vm_corr))
# combining the two distributions
movement_grid = gamma_density_layer + vonmises_density_layer # Gamma and von Mises densities are on the log-scale
# normalise the movement predictions
movement_grid = movement_grid - torch.logsumexp(movement_grid, dim = (1, 2), keepdim = True)
return movement_grid
class Scalar_to_Grid_Block(nn.Module):
def __init__(self, params):
super(Scalar_to_Grid_Block, self).__init__()
self.batch_size = params.batch_size
self.image_dim = params.image_dim
self.device = params.device
def forward(self, x):
num_scalars = x.shape[1]
scalar_map = x.view(x.shape[0], num_scalars, 1, 1).expand(x.shape[0], num_scalars, self.image_dim, self.image_dim)
return scalar_map
class ConvJointModel(nn.Module):
def __init__(self, params):
super(ConvJointModel, self).__init__()
self.scalar_grid_output = Scalar_to_Grid_Block(params)
self.conv_habitat = Conv2d_block_spatial(params)
self.conv_movement = Conv2d_block_toFC(params)
self.fcn_movement_all = FCN_block_all_movement(params)
self.movement_grid_output = Params_to_Grid_Block(params)
self.device = params.device
def forward(self, x):
spatial_data_x = x[0]
scalars_to_grid = x[1]
bearing_x = x[2]
# SCALAR GRIDS
scalar_grids = self.scalar_grid_output(scalars_to_grid)
all_spatial = torch.cat([spatial_data_x, scalar_grids], dim = 1)
# print(f"Shape after scalar grid: {all_spatial.shape}") # Debugging print
# HABITAT SELECTION
output_habitat = self.conv_habitat(all_spatial)
# print(f"Shape after CNN habitat: {output_habitat.shape}") # Debugging print
# MOVEMENT
conv_movement = self.conv_movement(all_spatial)
# print(f"Shape after CNN to FC movement: {conv_movement.shape}") # Debugging print
output_movement = self.fcn_movement_all(conv_movement)
# print(f"Shape after fcn_movement_all: {output_movement.shape}") # Debugging print
output_movement = self.movement_grid_output(output_movement, bearing_x)
# print(f"Shape after CNN movement: {output_movement.shape}") # Debugging print
# combine the habitat and movement predictions
output = torch.stack((output_habitat, output_movement), dim = -1)
return output
class ModelParams():
def __init__(self, dict_params):
self.batch_size = dict_params["batch_size"]
self.image_dim = dict_params["image_dim"]
self.pixel_size = dict_params["pixel_size"]
self.batch_size = dict_params["batch_size"]
self.dim_in_nonspatial_to_grid = dict_params["dim_in_nonspatial_to_grid"]
self.dense_dim_in_nonspatial = dict_params["dense_dim_in_nonspatial"]
self.dense_dim_hidden = dict_params["dense_dim_hidden"]
self.dense_dim_out = dict_params["dense_dim_out"]
self.batch_size = dict_params["batch_size"]
self.dense_dim_in_all = dict_params["dense_dim_in_all"]
self.dense_dim_hidden = dict_params["dense_dim_hidden"]
self.dense_dim_out = dict_params["dense_dim_out"]
self.batch_size = dict_params["batch_size"]
self.input_channels = dict_params["input_channels"]
self.output_channels = dict_params["output_channels"]
self.kernel_size = dict_params["kernel_size"]
self.stride = dict_params["stride"]
self.kernel_size_mp = dict_params["kernel_size_mp"]
self.stride_mp = dict_params["stride_mp"]
self.padding = dict_params["padding"]
self.image_dim = dict_params["image_dim"]
self.num_movement_params = dict_params["num_movement_params"]
self.dropout = dict_params["dropout"]
self.device = dict_params["device"]
Instantiate the model¶
In [20]:
params_dict = {"batch_size": 32,
"image_dim": 101, #number of pixels along the edge of each local patch/image
"pixel_size": 25, #number of metres along the edge of a pixel
"dim_in_nonspatial_to_grid": 4, #the number of scalar predictors that are converted to a grid and appended to the spatial features
"dense_dim_in_nonspatial": 4, #change this to however many other scalar predictors you have (bearing, velocity etc)
"dense_dim_hidden": 128, #number of nodes in the hidden layers
"dense_dim_out": 128, #number of nodes in the output of the fully connected block (FCN)
"dense_dim_in_all": 2500,# + 128, #number of inputs entering the fully connected block once the nonspatial features have been concatenated to the spatial features
"input_channels": 4 + 4, #number of spatial layers in each image + number of scalar layers that are converted to a grid
"output_channels": 4, #number of filters to learn
"kernel_size": 3, #the size of the 2D moving windows / kernels that are being learned
"stride": 1, #the stride used when applying the kernel. This reduces the dimension of the output if set to greater than 1
"kernel_size_mp": 2, #the size of the kernel that is used in max pooling operations
"stride_mp": 2, #the stride that is used in max pooling operations
"padding": 1, #the amount of padding to apply to images prior to applying the 2D convolution
"num_movement_params": 12, #number of parameters used to parameterise the movement kernel
"dropout": 0.1,
"device": device
}
params = ModelParams(params_dict)
model = ConvJointModel(params).to(device)
# print(model)
Load the model structure¶
In [21]:
# date of the trained model checkpoint
date = '20240913'
# # load the model weights
# print(model.state_dict())
model.load_state_dict(torch.load(f'model_checkpoints/checkpoint_CNN_global_buffalo{buffalo_id}_TAmix_bearing-rev_adj_hab-covs_grid-only_{date}.pt', map_location=torch.device('cpu')))
print(model.state_dict())
model.eval()
OrderedDict({'conv_habitat.conv2d.0.weight': tensor([[[[-0.0368, -0.0134, -0.0739],
[ 0.0177, 0.0321, 0.0695],
[ 0.0172, 0.1060, 0.0896]],
[[ 0.0051, -0.0326, -0.0400],
[ 0.0039, -0.0624, -0.0210],
[-0.0321, 0.0741, 0.0164]],
[[ 0.0699, 0.1295, 0.1569],
[ 0.0085, 0.0970, 0.2385],
[ 0.0476, 0.0005, 0.1006]],
[[ 0.0427, 0.0132, 0.1719],
[ 0.1421, 0.1424, -0.0366],
[ 0.0328, 0.1222, 0.1507]],
[[-0.0751, 0.0414, -0.0240],
[-0.0313, -0.0388, -0.0222],
[ 0.0988, -0.0882, 0.0178]],
[[-0.0320, -0.0515, -0.0369],
[-0.0992, -0.0287, -0.0692],
[-0.0535, -0.0131, -0.0984]],
[[ 0.0766, 0.0536, 0.1235],
[ 0.0686, 0.1765, -0.0189],
[ 0.1031, 0.1593, 0.1923]],
[[-0.0793, 0.0530, 0.0059],
[ 0.0408, 0.0666, 0.0606],
[-0.0086, -0.0875, -0.0296]]],
[[[ 0.1216, 0.1125, -0.0479],
[ 0.1566, 0.1483, 0.0746],
[ 0.0624, 0.1083, 0.1018]],
[[ 0.2009, 0.1701, -0.0631],
[ 0.0123, 0.1958, -0.0802],
[ 0.1127, 0.1607, 0.0322]],
[[-0.1107, -0.0751, 0.0580],
[ 0.0263, 0.1149, 0.0480],
[ 0.0103, 0.0434, 0.0534]],
[[ 0.1855, 0.1716, 0.2361],
[ 0.2497, 0.3335, 0.3186],
[ 0.2981, 0.2336, 0.2098]],
[[-0.0435, 0.0425, -0.0904],
[-0.0885, -0.0642, 0.0095],
[ 0.0147, -0.0451, 0.0461]],
[[ 0.1278, 0.0076, 0.0850],
[ 0.0264, 0.0831, 0.1597],
[ 0.1512, -0.0292, 0.0377]],
[[-0.1170, -0.1101, -0.1151],
[-0.0725, -0.0327, 0.0401],
[ 0.0347, 0.0455, -0.1245]],
[[-0.0451, 0.0714, -0.0642],
[ 0.0596, 0.0395, 0.0036],
[ 0.0164, 0.0020, -0.0514]]],
[[[ 0.0939, -0.0430, 0.1582],
[ 0.1726, -0.0604, 0.1436],
[ 0.0586, 0.0081, 0.1139]],
[[-0.1596, -0.1508, 0.0819],
[-0.0461, -0.0364, 0.0650],
[-0.0433, -0.0888, -0.0432]],
[[-0.0325, 0.0398, 0.0343],
[-0.0745, -0.0852, 0.0427],
[ 0.1780, 0.1101, 0.0441]],
[[-0.0890, 0.0241, -0.0272],
[ 0.0570, -0.0529, 0.1088],
[-0.0010, 0.0050, 0.1367]],
[[-0.0978, 0.0085, -0.0905],
[ 0.0651, -0.1181, 0.0384],
[ 0.0068, 0.0771, -0.0469]],
[[-0.1132, -0.0260, -0.0629],
[ 0.0575, 0.0952, 0.0885],
[ 0.0652, -0.0856, -0.1459]],
[[-0.2363, -0.2128, -0.2295],
[-0.1884, -0.0381, -0.0636],
[-0.0939, -0.1870, -0.1678]],
[[ 0.1351, 0.0708, 0.0154],
[ 0.0991, 0.0522, 0.1423],
[ 0.1285, 0.0776, 0.0231]]],
[[[-0.0491, -0.1152, 0.1313],
[ 0.1094, -0.1041, 0.0582],
[ 0.2285, -0.0112, 0.2129]],
[[-0.1088, 0.0420, -0.1128],
[-0.0531, -0.0984, -0.0498],
[-0.0638, 0.1497, 0.1210]],
[[ 0.1637, -0.0286, 0.0726],
[ 0.0336, -0.0434, -0.0152],
[ 0.0022, 0.0014, -0.0785]],
[[-0.0120, -0.0679, -0.0068],
[-0.0594, 0.0857, 0.0921],
[-0.0251, 0.0546, 0.1210]],
[[ 0.0821, 0.0180, -0.0463],
[-0.0644, -0.0123, 0.0125],
[-0.0926, 0.1189, 0.0042]],
[[ 0.3123, 0.0635, 0.2937],
[ 0.2070, 0.2160, 0.2558],
[ 0.2164, 0.2365, 0.2375]],
[[ 0.1169, 0.1590, -0.0132],
[ 0.0465, 0.0732, -0.0290],
[ 0.0964, 0.1595, 0.0014]],
[[-0.0937, -0.1444, -0.0637],
[-0.1498, -0.1243, -0.0603],
[-0.0298, -0.0139, -0.0264]]]]), 'conv_habitat.conv2d.0.bias': tensor([ 0.0429, -0.0627, 0.1203, 0.2632]), 'conv_habitat.conv2d.2.weight': tensor([[[[ 0.0207, -0.2585, -0.0831],
[-0.1043, -0.2646, -0.2050],
[-0.1125, -0.2740, -0.2659]],
[[ 0.1162, -0.1531, -0.0120],
[ 0.1499, -0.0533, -0.0351],
[-0.0381, 0.1768, 0.2326]],
[[ 0.2421, -0.0356, 0.1410],
[ 0.0513, 0.0544, 0.1952],
[ 0.2779, 0.1041, 0.2545]],
[[ 0.2693, 0.1320, 0.2339],
[ 0.2049, 0.0584, -0.0134],
[ 0.1758, 0.0016, 0.1716]]],
[[[-0.0058, 0.0893, 0.0199],
[ 0.1267, -0.0906, 0.0823],
[-0.1315, 0.0060, -0.1377]],
[[ 0.1165, 0.1294, 0.1599],
[-0.0788, 0.2204, 0.1045],
[ 0.0019, 0.2411, 0.1404]],
[[-0.1782, -0.1802, -0.0920],
[-0.2061, -0.1698, -0.0354],
[ 0.0525, 0.0628, -0.2051]],
[[-0.1547, 0.0495, -0.1781],
[ 0.1444, 0.0394, -0.1355],
[-0.1612, 0.1408, -0.2080]]],
[[[-0.0155, -0.0560, 0.0131],
[ 0.1160, -0.0757, 0.0062],
[-0.1077, 0.0124, -0.1247]],
[[ 0.0634, -0.1625, -0.0854],
[-0.0291, 0.0443, -0.2483],
[ 0.0192, -0.1055, 0.1636]],
[[ 0.0608, 0.2450, 0.2833],
[ 0.1122, 0.1202, 0.0540],
[ 0.1374, 0.3025, 0.3636]],
[[ 0.2703, 0.0808, 0.3889],
[ 0.2558, 0.0457, 0.0537],
[ 0.2008, 0.2397, 0.3383]]],
[[[-0.0428, 0.0826, 0.1546],
[ 0.1523, 0.0344, -0.1603],
[ 0.0872, -0.1652, -0.0859]],
[[-0.0991, -0.0728, 0.2356],
[ 0.1718, 0.2120, 0.1634],
[-0.1062, -0.1227, 0.0533]],
[[ 0.0755, -0.1185, 0.0811],
[-0.1503, 0.1099, -0.0596],
[-0.1652, -0.0534, 0.0519]],
[[-0.1628, 0.1281, 0.0401],
[ 0.0943, 0.0178, 0.1172],
[-0.2255, -0.0116, -0.1365]]]]), 'conv_habitat.conv2d.2.bias': tensor([ 0.1559, 0.0808, 0.1760, -0.1579]), 'conv_habitat.conv2d.4.weight': tensor([[[[ 0.1375, 0.2710, 0.2994],
[ 0.3686, 0.0485, 0.1733],
[ 0.3552, 0.3868, 0.3075]],
[[-0.0089, -0.2148, -0.1354],
[-0.1949, -0.1225, -0.3696],
[-0.0984, -0.0811, -0.2898]],
[[ 0.3479, 0.1998, 0.2570],
[ 0.0434, 0.1106, 0.1740],
[ 0.2669, 0.0855, 0.4095]],
[[-0.3457, -0.1092, -0.0394],
[-0.3023, -0.1219, -0.2417],
[-0.3328, -0.3692, -0.1664]]]]), 'conv_habitat.conv2d.4.bias': tensor([-0.1230]), 'conv_movement.conv2d.0.weight': tensor([[[[ 0.0510, 0.0049, 0.1684],
[ 0.0406, 0.0835, -0.0563],
[ 0.0850, 0.0980, -0.0350]],
[[ 0.0704, 0.0339, 0.0398],
[-0.0266, -0.0028, -0.0556],
[ 0.0547, -0.0490, 0.0089]],
[[ 0.0334, -0.0113, -0.1390],
[ 0.0126, -0.1331, -0.0026],
[ 0.0179, -0.0944, -0.1123]],
[[-0.0773, 0.0804, 0.0500],
[-0.0278, -0.0353, 0.0652],
[ 0.0625, -0.0603, -0.0527]],
[[ 0.1257, 0.0407, 0.1107],
[ 0.0525, -0.0287, -0.0031],
[ 0.0249, 0.0032, -0.0020]],
[[ 0.1255, 0.0249, 0.0642],
[ 0.1016, 0.0449, -0.0163],
[ 0.1492, 0.1522, 0.0948]],
[[-0.1349, 0.0244, 0.0190],
[-0.0308, 0.0322, 0.0181],
[ 0.0832, 0.0460, 0.0516]],
[[ 0.0340, -0.1111, 0.0014],
[ 0.0170, 0.0822, -0.0927],
[ 0.0824, -0.0051, 0.0872]]],
[[[-0.0267, 0.0702, 0.0154],
[-0.0729, -0.1086, 0.0556],
[-0.1096, -0.1025, 0.0588]],
[[-0.0605, -0.1014, 0.0546],
[ 0.0671, -0.1323, 0.0488],
[-0.0019, -0.1075, -0.0090]],
[[-0.0540, -0.0557, -0.0118],
[ 0.0544, 0.0341, 0.0051],
[ 0.0583, -0.0737, -0.0674]],
[[-0.0714, 0.0877, 0.0077],
[ 0.1170, -0.0983, -0.0973],
[ 0.0304, 0.0322, 0.0311]],
[[ 0.0291, -0.0036, -0.0871],
[-0.1134, 0.0539, 0.0185],
[-0.0543, -0.0981, 0.0982]],
[[ 0.0279, -0.0205, -0.0973],
[-0.1469, -0.0555, 0.0179],
[-0.1105, -0.1721, -0.0531]],
[[-0.0550, -0.0026, 0.0419],
[-0.1221, 0.0251, -0.0560],
[ 0.0735, 0.0812, 0.0457]],
[[ 0.0998, 0.0213, -0.0192],
[-0.0924, 0.0830, -0.0559],
[ 0.0595, -0.0648, -0.0440]]],
[[[ 0.0556, -0.0207, 0.0812],
[-0.0412, 0.0794, -0.0905],
[ 0.1074, -0.0315, -0.0887]],
[[-0.0244, -0.0407, -0.1010],
[-0.0624, 0.1011, 0.0181],
[-0.0916, 0.1115, -0.0763]],
[[ 0.0204, 0.0656, 0.0214],
[-0.0883, -0.0372, 0.1039],
[-0.0625, 0.0573, -0.0115]],
[[ 0.1088, 0.1076, -0.0519],
[ 0.1032, 0.0392, 0.0678],
[ 0.0478, -0.0060, -0.0363]],
[[-0.1069, 0.0169, 0.1061],
[ 0.0612, -0.0027, 0.0299],
[-0.0567, -0.0626, -0.0021]],
[[-0.0960, 0.1309, -0.0396],
[-0.0612, 0.0105, 0.1333],
[-0.0602, 0.0955, 0.1173]],
[[-0.0446, -0.1051, -0.1028],
[ 0.0779, -0.0449, -0.0641],
[-0.0166, -0.0010, 0.0820]],
[[-0.0482, -0.0322, 0.0546],
[ 0.0348, -0.0517, -0.0218],
[ 0.0307, -0.0426, 0.0084]]],
[[[-0.0941, 0.0981, -0.0359],
[-0.0740, -0.0615, 0.0974],
[ 0.0319, 0.0195, -0.0118]],
[[-0.0461, 0.0975, -0.0212],
[ 0.1396, 0.0681, 0.0824],
[ 0.0235, 0.1458, 0.0637]],
[[-0.1000, 0.0101, 0.0652],
[-0.0580, 0.0784, 0.0608],
[ 0.0857, 0.0398, -0.0321]],
[[-0.0739, -0.0121, -0.0785],
[-0.1053, 0.0169, 0.0978],
[ 0.0973, -0.0775, 0.0589]],
[[ 0.1033, 0.0471, 0.0441],
[ 0.0374, 0.0223, -0.0452],
[ 0.0508, -0.1083, 0.0375]],
[[-0.0686, -0.0630, -0.0308],
[-0.0817, -0.0759, 0.0812],
[-0.0274, 0.0146, 0.0606]],
[[ 0.0633, -0.0179, 0.0921],
[-0.0132, -0.0032, 0.0731],
[-0.0133, -0.0096, 0.0954]],
[[ 0.0257, 0.0301, -0.0878],
[-0.0374, 0.0105, -0.0564],
[ 0.0760, 0.0858, 0.0041]]]]), 'conv_movement.conv2d.0.bias': tensor([ 0.0996, -0.0291, 0.0981, -0.0349]), 'conv_movement.conv2d.3.weight': tensor([[[[-0.1463, 0.1088, 0.0398],
[ 0.1528, -0.1527, -0.1387],
[-0.1613, 0.0689, 0.1061]],
[[-0.0124, -0.1051, -0.1141],
[-0.0235, -0.0636, -0.1391],
[ 0.1181, 0.0564, -0.0184]],
[[ 0.0311, -0.0529, 0.0009],
[-0.0357, 0.0722, -0.1463],
[ 0.0988, -0.0810, -0.0481]],
[[ 0.0838, 0.0940, 0.0238],
[ 0.0038, 0.1074, 0.0380],
[-0.1141, 0.1000, 0.0315]]],
[[[-0.0082, -0.0285, -0.0440],
[ 0.1579, 0.0206, 0.1150],
[-0.1062, -0.0059, 0.0693]],
[[-0.0345, -0.0447, 0.1101],
[ 0.0723, 0.0555, 0.1545],
[ 0.1729, -0.0309, 0.1388]],
[[ 0.0303, 0.0901, 0.0467],
[-0.0583, 0.1481, -0.1363],
[-0.0411, 0.1343, -0.1147]],
[[ 0.0399, -0.1580, 0.0886],
[-0.1364, 0.0623, 0.1389],
[-0.0348, -0.0724, -0.0166]]],
[[[ 0.1291, 0.0795, 0.1066],
[-0.0995, 0.1558, 0.0935],
[ 0.0022, 0.1491, 0.0989]],
[[ 0.1930, 0.0390, 0.1914],
[-0.0081, -0.0318, -0.0125],
[-0.1445, -0.0754, -0.1263]],
[[-0.0991, 0.0775, 0.0836],
[ 0.1105, -0.1096, 0.0054],
[ 0.0021, -0.0706, -0.0750]],
[[-0.0861, 0.0978, -0.1280],
[-0.0053, -0.1482, -0.1093],
[-0.0514, -0.0211, 0.0459]]],
[[[ 0.1470, 0.1572, -0.0227],
[ 0.0372, 0.0211, -0.0680],
[ 0.0969, -0.0412, 0.0442]],
[[-0.0756, 0.0531, 0.0254],
[-0.0584, -0.0716, 0.0875],
[ 0.0440, -0.0200, -0.0683]],
[[ 0.0399, 0.0820, 0.1601],
[ 0.1250, -0.0914, -0.0848],
[-0.0561, 0.1629, -0.0629]],
[[-0.0522, -0.1008, 0.0371],
[-0.0392, 0.0188, -0.0007],
[-0.1071, 0.1307, 0.0849]]]]), 'conv_movement.conv2d.3.bias': tensor([ 0.1174, 0.1325, -0.0597, 0.0032]), 'fcn_movement_all.ffn.0.weight': tensor([[ 0.0149, 0.0115, -0.0123, ..., -0.0095, 0.0132, 0.0007],
[ 0.0091, 0.0164, -0.0049, ..., -0.0165, 0.0147, 0.0155],
[-0.0063, -0.0067, 0.0041, ..., 0.0090, 0.0075, 0.0150],
...,
[ 0.0267, -0.0020, -0.0049, ..., -0.0070, -0.0162, -0.0134],
[ 0.0205, 0.0275, 0.0115, ..., -0.0023, -0.0142, 0.0044],
[ 0.0150, -0.0153, -0.0090, ..., -0.0128, -0.0138, 0.0021]]), 'fcn_movement_all.ffn.0.bias': tensor([ 0.0010, 0.0112, 0.0118, 0.0066, 0.0121, -0.0040, -0.0113, 0.0066,
-0.0163, 0.0063, -0.0067, -0.0077, -0.0118, -0.0119, -0.0193, -0.0177,
-0.0146, -0.0141, 0.0054, 0.0066, -0.0111, 0.0121, 0.0032, 0.0065,
-0.0162, 0.0185, 0.0032, 0.0041, -0.0189, 0.0127, 0.0192, 0.0076,
-0.0175, -0.0055, 0.0114, -0.0142, 0.0183, -0.0072, 0.0073, -0.0153,
0.0215, -0.0184, 0.0290, 0.0061, -0.0139, -0.0170, 0.0050, 0.0048,
0.0160, -0.0065, 0.0050, 0.0158, 0.0187, -0.0140, 0.0059, 0.0042,
-0.0201, -0.0027, -0.0152, -0.0046, -0.0216, 0.0013, -0.0002, 0.0120,
0.0098, -0.0157, 0.0080, 0.0150, 0.0204, 0.0148, 0.0192, 0.0024,
-0.0154, 0.0292, -0.0186, -0.0068, 0.0006, -0.0205, -0.0090, -0.0018,
0.0098, -0.0185, -0.0153, -0.0144, 0.0185, -0.0187, -0.0137, 0.0001,
0.0073, -0.0167, -0.0088, -0.0189, 0.0225, 0.0041, -0.0197, 0.0069,
-0.0092, -0.0039, 0.0025, -0.0080, 0.0109, -0.0122, 0.0184, 0.0101,
-0.0133, -0.0025, -0.0180, 0.0058, 0.0011, 0.0031, -0.0122, -0.0075,
-0.0161, -0.0041, -0.0169, -0.0144, 0.0135, -0.0162, -0.0053, -0.0144,
0.0002, -0.0015, -0.0130, 0.0161, -0.0028, 0.0110, -0.0080, 0.0143]), 'fcn_movement_all.ffn.3.weight': tensor([[ 0.0709, -0.0686, -0.0684, ..., -0.0685, 0.0199, 0.0439],
[-0.0714, 0.0408, -0.0841, ..., 0.0963, 0.0102, 0.0181],
[-0.0637, -0.0808, -0.0018, ..., 0.0488, 0.0009, 0.0614],
...,
[ 0.0009, -0.0664, 0.0354, ..., 0.0756, 0.0349, 0.0348],
[-0.0039, 0.0364, -0.0362, ..., -0.0324, 0.0324, 0.0512],
[ 0.0114, -0.0226, -0.0481, ..., -0.0066, 0.0663, 0.0315]]), 'fcn_movement_all.ffn.3.bias': tensor([-0.0264, -0.0202, 0.0647, 0.0137, 0.0166, 0.0652, -0.0585, -0.0630,
-0.0566, -0.0471, -0.0063, 0.0347, 0.0156, 0.0359, -0.0207, -0.0591,
-0.0475, -0.0308, 0.0484, -0.0285, -0.0575, -0.0350, -0.0688, 0.0413,
0.0018, 0.0607, 0.0023, 0.0786, 0.0003, 0.0141, -0.0142, 0.0736,
0.0203, 0.0400, 0.0900, -0.0628, 0.0696, 0.0852, 0.0162, -0.0843,
-0.0590, 0.0281, -0.0676, 0.0101, -0.0176, -0.0396, -0.0493, -0.0149,
-0.0029, 0.0537, -0.0004, -0.0272, 0.0740, 0.0968, 0.0001, -0.0306,
-0.0528, -0.0810, -0.0113, 0.0070, 0.0093, 0.0543, 0.0875, 0.0067,
-0.0231, 0.0589, 0.0441, -0.0875, 0.0818, -0.0296, -0.0556, 0.0841,
-0.0057, -0.0231, 0.0310, 0.0739, 0.0695, -0.0546, 0.0269, -0.0503,
0.0805, -0.0120, -0.0219, 0.0358, 0.0411, -0.0386, -0.0403, -0.0042,
0.0740, -0.0646, 0.0367, 0.0803, 0.0216, 0.1001, 0.0310, 0.0483,
0.0196, 0.0331, 0.0645, -0.0319, -0.0414, -0.0581, 0.0753, -0.0247,
-0.0107, 0.0427, -0.0487, 0.0406, 0.0550, -0.0184, -0.0585, 0.0391,
0.0837, -0.0357, 0.0112, 0.0722, -0.0266, 0.0068, 0.0081, -0.0312,
0.0050, 0.0347, 0.0124, 0.0922, 0.0276, -0.0404, -0.0490, -0.0579]), 'fcn_movement_all.ffn.6.weight': tensor([[-0.0135, 0.0989, 0.0645, ..., 0.0323, 0.0300, 0.0584],
[ 0.0595, -0.0107, -0.0370, ..., 0.0015, 0.0687, 0.0916],
[ 0.0335, -0.0315, -0.0307, ..., 0.0463, 0.0601, 0.0458],
...,
[ 0.0619, -0.0652, -0.0701, ..., 0.0911, -0.0090, -0.0771],
[-0.0357, -0.0774, -0.0543, ..., -0.0689, -0.0993, -0.0332],
[-0.0370, -0.0066, 0.1055, ..., -0.0095, 0.0717, 0.0401]]), 'fcn_movement_all.ffn.6.bias': tensor([ 0.0203, -0.0187, 0.0870, -0.0324, -0.0639, 0.0060, 0.0291, 0.0544,
-0.0263, -0.0153, -0.0598, 0.0104])})
C:\Users\for329\AppData\Local\Temp\ipykernel_24996\2620175939.py:6: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
model.load_state_dict(torch.load(f'model_checkpoints/checkpoint_CNN_global_buffalo{buffalo_id}_TAmix_bearing-rev_adj_hab-covs_grid-only_{date}.pt', map_location=torch.device('cpu')))
Out[21]:
ConvJointModel(
(scalar_grid_output): Scalar_to_Grid_Block()
(conv_habitat): Conv2d_block_spatial(
(conv2d): Sequential(
(0): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): Conv2d(4, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(conv_movement): Conv2d_block_toFC(
(conv2d): Sequential(
(0): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(4): ReLU()
(5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Flatten(start_dim=1, end_dim=-1)
)
)
(fcn_movement_all): FCN_block_all_movement(
(ffn): Sequential(
(0): Linear(in_features=2500, out_features=128, bias=True)
(1): Dropout(p=0.1, inplace=False)
(2): ReLU()
(3): Linear(in_features=128, out_features=128, bias=True)
(4): Dropout(p=0.1, inplace=False)
(5): ReLU()
(6): Linear(in_features=128, out_features=12, bias=True)
)
)
(movement_grid_output): Params_to_Grid_Block()
)
Setup simulation parameters¶
Testing the subset function
In [22]:
# Create a mapping from day of the year to month index
def day_to_month_index(day_of_year):
# Calculate the year and the day within that year
base_date = datetime(2018, 1, 1)
date = base_date + timedelta(days=int(day_of_year) - 1)
year_diff = date.year - base_date.year
month_index = (date.month - 1) + (year_diff * 12) # month index (0-based, accounting for year change)
return month_index
yday = 60
month_index = day_to_month_index(yday)
print(month_index)
2
In [23]:
window_size = 101
# starting location of buffalo 2005
x = 41969.310875
y = -1.435671e+06
yday = 280
month_index = day_to_month_index(yday)
ndvi_subset, origin_x, origin_y = subset_raster_with_padding_torch(ndvi_global_norm[month_index,:,:], x, y, window_size, raster_transform)
canopy_subset, origin_x, origin_y = subset_raster_with_padding_torch(canopy_global_norm, x, y, window_size, raster_transform)
herby_subset, origin_x, origin_y = subset_raster_with_padding_torch(herby_global_norm, x, y, window_size, raster_transform)
slope_subset, origin_x, origin_y = subset_raster_with_padding_torch(slope_global_norm, x, y, window_size, raster_transform)
# Plot the subset
fig, axs = plt.subplots(2, 2, figsize=(10, 10))
axs[0, 0].imshow(ndvi_subset.numpy(), cmap='viridis')
axs[0, 0].set_title('NDVI')
# axs[0, 0].axis('off')
axs[0, 1].imshow(canopy_subset.numpy(), cmap='viridis')
axs[0, 1].set_title('Canopy Cover')
# axs[0, 1].axis('off')
axs[1, 0].imshow(herby_subset.numpy(), cmap='viridis')
axs[1, 0].set_title('Herbaceous Vegetation')
# axs[1, 0].axis('off')
axs[1, 1].imshow(slope_subset.numpy(), cmap='viridis')
axs[1, 1].set_title('Slope')
# axs[1, 1].axis('off')
Out[23]:
Text(0.5, 1.0, 'Slope')
Dealing with NaN values¶
In [24]:
layer = ndvi_subset
# layer = ndvi_global_norm
layer = slope_subset
# are there nans in the array?
# print(torch.isnan(layer).any())
# Create mask where original array has values of -1, which is only at the edges as everything else is normalised between 0 and 1
mask = np.where(layer == -1, -np.inf, 1)
plt.imshow(mask)
plt.colorbar()
plt.show()
# # Replace NaNs in the original array with 0
# layer_filled = np.nan_to_num(layer, nan=0.)
layer_updated = layer * mask
plt.imshow(layer_updated)
plt.colorbar()
plt.show()
Stack the subset layers¶
In [25]:
# Stack the channels along a new axis; here, 1 is commonly used for channel axis in PyTorch
subset_stack = torch.stack([ndvi_subset, canopy_subset, herby_subset, slope_subset], dim=0)
subset_stack
x1 = subset_stack
x1 = x1.unsqueeze(0)
print(x1.shape)
# print(x1)
torch.Size([1, 4, 101, 101])
Additional data¶
In [26]:
n_steps = 1000
def repeat_sequence(sequence, length_out):
return np.resize(sequence, length_out)
# hour of the day (hour) sequence
hour_t2 = np.resize(range(24), n_steps)
# print(hour_t2)
# convert to sine and cosine
hour_t2_sin = np.sin(2*np.pi*hour_t2/24)
hour_t2_cos = np.cos(2*np.pi*hour_t2/24)
# day of the year (yday) sequence
yday_sequence = np.repeat(range(yday, 365), 24)
yday_t2 = np.resize(yday_sequence, n_steps)
# print(yday_t2)
# convert to sine and cosine
yday_t2_sin = np.sin(2*np.pi*yday_t2/365)
yday_t2_cos = np.cos(2*np.pi*yday_t2/365)
# bearing vector
bearing = np.repeat(0, n_steps)
# Convert lists to PyTorch tensors
hour_t2_tensor = torch.tensor(hour_t2).float()
hour_t2_sin_tensor = torch.tensor(hour_t2_sin).float()
hour_t2_cos_tensor = torch.tensor(hour_t2_cos).float()
yday_t2_tensor = torch.tensor(yday_t2).float()
yday_t2_sin_tensor = torch.tensor(yday_t2_sin).float()
yday_t2_cos_tensor = torch.tensor(yday_t2_cos).float()
bearing = torch.tensor(bearing).float()
# Stack tensors column-wise
x2_full = torch.stack((hour_t2_sin_tensor, hour_t2_cos_tensor, yday_t2_sin_tensor, yday_t2_cos_tensor), dim=1)
# print(x2)
print(x2_full.shape)
# print(x2)
print(x2_full[59,:])
torch.Size([1000, 4]) tensor([ 0.2588, -0.9659, -0.9899, 0.1415])
Functions to recover the hour and yday from the sin and cosine terms¶
In [27]:
def recover_hour(sin_term, cos_term):
# Calculate the angle theta
theta = np.arctan2(sin_term, cos_term)
# Calculate hour_t2
hour = (12 * theta) / np.pi % 24
return hour
def recover_yday(sin_term, cos_term):
# Calculate the angle theta
theta = np.arctan2(sin_term, cos_term)
# Calculate hour_t2
yday = (365 * theta) / (2 * np.pi) % 365
return yday
Test the model on the subsetted layers¶
In [29]:
step = 20
test = model((x1, x2_full[step,:].unsqueeze(0), bearing[step].unsqueeze(0).unsqueeze(0)))
print(x2_full[step,:])
print(bearing[step].unsqueeze(0).unsqueeze(0))
x2_step = x2_full[step,:]
# Pull out the scalars
hour_t2_sin = x2_step.detach().numpy()[0]
hour_t2_cos = x2_step.detach().numpy()[1]
yday_t2_sin = x2_step.detach().numpy()[2]
yday_t2_cos = x2_step.detach().numpy()[3]
# recover the hour value for the step
hour_t2 = recover_hour(hour_t2_sin, hour_t2_cos)
print(hour_t2)
# recover the yday value for the step
yday_t2 = recover_yday(yday_t2_sin, yday_t2_cos)
print(yday_t2)
# plot the results of the habitat density as an image
hab_density = test.detach().numpy()[0,:,:,0]
plt.imshow(hab_density)
plt.colorbar()
plt.show()
# Create the mask for x and y coordinates
x_mask = np.ones_like(hab_density)
y_mask = np.ones_like(hab_density)
# mask out cells on the edges that affect the colour scale
x_mask[:, :3] = -np.inf
x_mask[:, 98:] = -np.inf
y_mask[:3, :] = -np.inf
y_mask[98:, :] = -np.inf
hab_density_mask = hab_density * x_mask * y_mask
plt.imshow(hab_density_mask)
plt.colorbar()
plt.show()
plt.imshow(np.exp(hab_density))
plt.colorbar()
plt.show()
# plot the results of the movement density as an image
plt.imshow(test.detach().numpy()[0,:,:,1])
plt.colorbar()
plt.show()
# plot the results of the exp movement density as an image
plt.imshow(np.exp(test.detach().numpy()[0,:,:,1]))
plt.colorbar()
plt.show()
test_cat = (test[:, :, :, 0] + test[:, :, :, 1])
test_cat = test_cat.squeeze()
print(test_cat.shape)
# # Create mask where original array has values of -1, which is only at the edges as everything else is normalised between 0 and 1
layer = ndvi_subset
mask = torch.where(layer == -1, torch.tensor(float('nan')), 1)
plt.imshow(mask)
plt.colorbar()
plt.show()
test_cat = test_cat * mask
test_cat = test_cat.detach().numpy()[:,:]
test_cat = test_cat * x_mask * y_mask
plt.imshow(test_cat)
plt.colorbar()
plt.show()
test_cat_exp = np.exp(test_cat)
test_cat_exp = test_cat_exp * x_mask * y_mask
plt.imshow(test_cat_exp)
plt.colorbar()
plt.show()
tensor([-0.8660, 0.5000, -0.9942, 0.1074]) tensor([[0.]]) 19.99999988868986 280.00000313675633
torch.Size([101, 101])
C:\Users\for329\AppData\Local\Temp\ipykernel_24996\4292268014.py:81: RuntimeWarning: invalid value encountered in multiply test_cat_exp = test_cat_exp * x_mask * y_mask
Sample from the probability surface¶
In [30]:
test_cat = (test[:, :, :, 0] + test[:, :, :, 1])
test_cat = test_cat.squeeze()
# sample from the array values
test_cat_exp = torch.exp(test_cat)
# replace NaNs with 0
test_cat_exp = torch.nan_to_num(test_cat_exp, nan=0.)
# normalise the array
test_cat_exp = test_cat_exp/torch.sum(test_cat_exp)
# print(test_cat_exp)
print(torch.sum(test_cat_exp))
# Flatten the probability surface
flat_prob_surface = test_cat_exp.flatten().detach().numpy()
print(flat_prob_surface)
# Generate the corresponding indices for the flattened array
indices = np.arange(flat_prob_surface.size)
print(indices)
# Sample from the flattened probability surface
sampled_index = np.random.choice(indices, p=flat_prob_surface)
print(sampled_index)
print(flat_prob_surface[sampled_index])
# Convert the sampled index back to 2D coordinates
sampled_coordinates = np.unravel_index(sampled_index, test_cat_exp.shape)
print("Sampled coordinates:", sampled_coordinates)
step_prob = np.exp(test_cat.detach().numpy()[:,:])
# Set the pixel of the next step, which is at (x, y) to 0
step_prob[sampled_coordinates] = -0.0000001
# step_prob[10, 30] = 1.0 # the y axis comes FIRST
plt.imshow(step_prob)
plt.colorbar()
plt.show()
tensor(1.0000, dtype=torch.float64, grad_fn=<SumBackward0>) [4.13488355e-13 1.14368620e-11 8.88027499e-11 ... 9.29886293e-12 3.65546899e-12 3.44399341e-13] [ 0 1 2 ... 10198 10199 10200] 4314 0.00028932580419744995 Sampled coordinates: (42, 72)
Return sampled point to geographic coordinates¶
In [31]:
# original locations
print(x ,y)
print(sampled_coordinates)
print(px, py)
# row_start = py - half_window
# row_stop = py + half_window + 1
# col_start = px - half_window
# col_stop = px + half_window + 1
print(row_start, col_start)
# new_px = origin_x + sampled_coordinates[0]
# new_py = origin_y + sampled_coordinates[1]
# THE Y COORDINATE COMES FIRST
new_px = origin_x + sampled_coordinates[1]
new_py = origin_y + sampled_coordinates[0]
print(new_px)
print(new_py)
# Convert geographic coordinates to pixel coordinates
new_x, new_y = raster_transform * (new_px, new_py)
print(new_x, new_y)
41969.310875 -1435671.0 (42, 72) 1480 1160 1590 2310 1701 1179 42525.0 -1435475.0
Full trajectory function¶
In [32]:
def simulate_trajectory(global_raster_tensors,
scalars_to_grid,
# additional_data_tensor,
bearing,
window_size,
x_loc,
y_loc,
global_raster_transform):
results = [subset_raster_with_padding_torch(raster_tensor, x=x_loc, y=y_loc, window_size=window_size, transform=global_raster_transform) for raster_tensor in global_raster_tensors]
# Unpacking the results
subset_rasters_tensors, origin_xs, origin_ys = zip(*results)
# print(subset_rasters_tensors)
# Stack the processed tensors along a new dimension (e.g., dimension 0)
x1 = torch.stack(subset_rasters_tensors, dim=0)
x1 = x1.unsqueeze(0)
# print(x1.shape)
single_layer = x1[0, 0, :, :]
# plt.imshow(single_layer.detach().numpy()[:,:])
# plt.colorbar()
# plt.show()
# create masking layer to remove outside of the extent
mask = torch.where(single_layer == -1, torch.tensor(float('nan')), 1)
# test_cat_exp = torch.exp(test_cat)
# extract NaNs to pad and make masking layer
x2 = scalars_to_grid
# x3 = additional_data_tensor
x3 = bearing
test = model((x1, x2, x3))
# print(test.shape)
hab_log_prob = test[:, :, :, 0]
move_log_prob = test[:, :, :, 1]
step_log_prob = (hab_log_prob + move_log_prob)
step_log_prob = step_log_prob * mask
hab_log_prob = hab_log_prob.squeeze()
move_log_prob = move_log_prob.squeeze()
step_log_prob = step_log_prob.squeeze()
# sample from the array values
step_prob = torch.exp(step_log_prob)
step_prob = torch.nan_to_num(step_prob, nan=0.)
step_prob_norm = step_prob/torch.sum(step_prob)
# print(test_cat_exp)
# print(torch.sum(test_cat_exp))
# plt.imshow(test_cat_exp.detach().numpy()[:,:])
# plt.colorbar()
# plt.show()
# Flatten the probability surface
flat_step_prob_norm = step_prob_norm.flatten().detach().numpy()
# print(flat_prob_surface)
# Generate the corresponding indices for the flattened array
indices = np.arange(flat_step_prob_norm.size)
# Sample from the flattened probability surface
sampled_index = np.random.choice(indices, p=flat_step_prob_norm)
# Convert the sampled index back to 2D coordinates
sampled_coordinates = np.unravel_index(sampled_index, step_prob_norm.shape)
# THE Y COORDINATE COMES FIRST in the sampled coordinates
new_px = origin_xs[0] + sampled_coordinates[1]
new_py = origin_ys[0] + sampled_coordinates[0]
# Convert geographic coordinates to pixel coordinates
new_x, new_y = raster_transform * (new_px, new_py)
# Place the sampled location at a random point within the cell (rather than the centre)
# new_x = new_x + np.random.uniform(-12.5, 12.5)
# new_y = new_y + np.random.uniform(-12.5, 12.5)
# instead of the uniform, sample from a normal distribution with mean 0 and sd 6.5,
# which are the parameters where the cell contains ~ 95% of density
# if it's outside the bounds of the cell, resample
while True:
jitter_x = np.random.normal(0, 6.5)
if -12.5 <= jitter_x <= 12.5:
break
# Sample jitter for new_y and ensure it is within bounds
while True:
jitter_y = np.random.normal(0, 6.5)
if -12.5 <= jitter_y <= 12.5:
break
# Add the valid jitter to new_x and new_y
new_x = new_x + jitter_x
new_y = new_y + jitter_y
# print(new_x, new_y)
# return new_x, new_y, step_prob
return new_x, new_y, hab_log_prob, move_log_prob, step_log_prob, sampled_coordinates[1], sampled_coordinates[0]
Call the function¶
In [33]:
global_raster_list = [ndvi_global_norm[which_ndvi,:,:], canopy_global_norm, herby_global_norm, slope_global_norm]
x2 = x2_full[3,:].unsqueeze(0)
# x3 = x2
bearing_step = bearing[3].unsqueeze(0).unsqueeze(0)
print(x2)
print(bearing.shape)
simulate_trajectory(global_raster_tensors=global_raster_list,
scalars_to_grid=x2,
# additional_data_tensor=x3,
bearing=bearing_step,
window_size=101,
x_loc=41969,
y_loc=-1.435671e+06,
global_raster_transform=raster_transform)
tensor([[ 0.7071, 0.7071, -0.9942, 0.1074]]) torch.Size([1000])
Out[33]:
(42069.500571025696,
-1435974.6646447105,
tensor([[-26.0534, -22.3366, -20.1514, ..., -20.2363, -22.0209, -25.8044],
[-23.1791, -17.4641, -13.9362, ..., -13.8954, -16.8116, -22.9113],
[-22.3855, -16.1095, -11.9769, ..., -11.6809, -14.9195, -21.8527],
...,
[-22.0862, -15.9353, -12.0901, ..., -12.0973, -15.2174, -21.9754],
[-24.3445, -19.8127, -16.7693, ..., -16.6318, -18.7989, -23.8923],
[-27.2956, -25.0496, -23.4760, ..., -23.3530, -24.4199, -27.0910]],
dtype=torch.float64, grad_fn=<SqueezeBackward0>),
tensor([[-16.5258, -16.4343, -16.3436, ..., -15.9515, -16.0365, -16.1225],
[-16.4377, -16.3451, -16.2534, ..., -15.8557, -15.9418, -16.0287],
[-16.3503, -16.2568, -16.1640, ..., -15.7606, -15.8477, -15.9357],
...,
[-16.4157, -16.3229, -16.2308, ..., -15.8676, -15.9544, -16.0421],
[-16.5038, -16.4119, -16.3209, ..., -15.9630, -16.0488, -16.1354],
[-16.5926, -16.5018, -16.4118, ..., -16.0590, -16.1438, -16.2295]],
dtype=torch.float64, grad_fn=<SqueezeBackward0>),
tensor([[-42.5792, -38.7709, -36.4950, ..., -36.1878, -38.0575, -41.9269],
[-39.6168, -33.8093, -30.1896, ..., -29.7511, -32.7534, -38.9400],
[-38.7358, -32.3662, -28.1409, ..., -27.4415, -30.7672, -37.7884],
...,
[-38.5019, -32.2582, -28.3209, ..., -27.9650, -31.1718, -38.0175],
[-40.8482, -36.2246, -33.0901, ..., -32.5947, -34.8476, -40.0277],
[-43.8883, -41.5514, -39.8877, ..., -39.4120, -40.5637, -43.3204]],
dtype=torch.float64, grad_fn=<SqueezeBackward0>),
54,
62)
Generate trajectory¶
Setup parameters¶
In [34]:
# Get today's date for saving figures
today_date = datetime.today().strftime('%Y-%m-%d')
# Setup the simulation parameters
n_steps = 3000
starting_yday = 206
# starting location of buffalo 2005
start_x = 41969.310875
start_y = -1.435671e+06
global_raster_list = [ndvi_global_norm, canopy_global_norm, herby_global_norm, slope_global_norm]
print(global_raster_list[0].shape)
window_size = 101
global_raster_transform = raster_transform
torch.Size([24, 2280, 2400])
Create simulation inputs from the parameters¶
In [35]:
x = np.repeat(0., n_steps)
y = np.repeat(0., n_steps)
# print(x)
x[0], y[0] = start_x, start_y
# print(x, y)
# Create sequence of steps
step = range(1, n_steps)
# print(step)
# hour of the day (hour) sequence
hour_t2 = np.resize(range(24), n_steps)
# print(hour_t2)
# convert to sine and cosine
hour_t2_sin = np.sin(2*np.pi*hour_t2/24)
hour_t2_cos = np.cos(2*np.pi*hour_t2/24)
# Create the day of the year sequences
yday_t2 = np.repeat(range(starting_yday, starting_yday + 365), 24)
yday_t2 = np.resize(yday_t2, n_steps)
# print(yday_t2)
# convert to sine and cosine
yday_t2_sin = np.sin(2*np.pi*yday_t2/365)
yday_t2_cos = np.cos(2*np.pi*yday_t2/365)
# bearing vector
bearing = np.repeat(0., n_steps)
# Convert lists to PyTorch tensors
hour_t2_tensor = torch.tensor(hour_t2).float()
hour_t2_sin_tensor = torch.tensor(hour_t2_sin).float()
hour_t2_cos_tensor = torch.tensor(hour_t2_cos).float()
yday_t2_tensor = torch.tensor(yday_t2).float()
yday_t2_sin_tensor = torch.tensor(yday_t2_sin).float()
yday_t2_cos_tensor = torch.tensor(yday_t2_cos).float()
bearing_tensor = torch.tensor(bearing).float()
# Stack tensors column-wise to create a tensor of shape [n_steps, 4]
x2_full = torch.stack((hour_t2_sin_tensor, hour_t2_cos_tensor, yday_t2_sin_tensor, yday_t2_cos_tensor), dim=1)
# x3_full = x2_full
# print(head(x2_full))
# List to hold filenames of saved images
filenames_hab = []
filenames_move = []
filenames_step = []
Select the appropriate NDVI layer from the yday¶
In [36]:
# Initialize variables to cache the previous yday and month index
previous_yday = None
month_index = None
day_to_month_index(500)
yday_sequence_1 = np.repeat(range(starting_yday, starting_yday + 365), 24)
print(yday_sequence_1)
print(len(yday_sequence_1))
[206 206 206 ... 570 570 570] 8760
Trajectory loop¶
In [37]:
for i in range(1, n_steps):
x_loc = x[i-1]
y_loc = y[i-1]
# calculate the bearing from the previous location
if i > 1:
bearing_rad = np.arctan2(y[i-1] - y[i-2], x[i-1] - x[i-2])
else:
bearing_rad = np.random.uniform(-np.pi, np.pi)
# Store the bearing in the vector
bearing[i-1] = bearing_rad
# print("Bearing[i-1]", bearing[i-1])
bearing_tensor = torch.tensor(bearing[i-1]).unsqueeze(0).unsqueeze(0)
x2 = x2_full[i-1,:].unsqueeze(dim=0)
# print(x2)
# Determine the month index based on the day of the year
day_of_year = yday_t2[i-1]
if day_of_year != previous_yday:
month_index = day_to_month_index(day_of_year)
previous_yday = day_of_year
# print(f'Day of the year: {day_of_year}')
month_index = day_to_month_index(day_of_year)
# print(f'Month index: {month_index}')
global_raster_list = [ndvi_global_norm[month_index,:,:], canopy_global_norm, herby_global_norm, slope_global_norm]
sim_outputs = simulate_trajectory(global_raster_tensors=global_raster_list,
scalars_to_grid=x2,
# additional_data_tensor=x2,
bearing=bearing_tensor,
window_size=101,
x_loc=x_loc,
y_loc=y_loc,
global_raster_transform=global_raster_transform)
new_x, new_y, hab_log_prob, move_log_prob, step_log_prob, px, py = sim_outputs
# print(new_x, new_y)
# print(px, py)
new_bearing = np.arctan2(new_y - y_loc, new_x - x_loc)
# print(new_bearing)
x[i] = new_x
y[i] = new_y
# ### PLOTTING
# # Create the mask for x and y coordinates
# x_mask = np.ones_like(hab_density)
# y_mask = np.ones_like(hab_density)
# # mask out cells on the edges that affect the colour scale
# x_mask[:, :3] = -np.inf
# x_mask[:, 98:] = -np.inf
# y_mask[:3, :] = -np.inf
# y_mask[98:, :] = -np.inf
# ## Habitat probability surface
# # normalise the raster for plotting
# hab_log_prob_norm = hab_log_prob/-torch.sum(hab_log_prob)
# # convert to numpy array
# hab_log_prob_norm = hab_log_prob_norm.detach().numpy()[:,:]
# # Create the mask for x and y coordinates
# hab_log_prob_norm = hab_log_prob_norm * x_mask * y_mask
# # Save the figure
# filename_hab = f"outputs/dl_prob_maps/ta_mix/id{buffalo_id}_hab_log_prob_norm_{today_date}_{i}.png"
# plt.figure() # Create a new figure
# plt.imshow(hab_log_prob_norm)
# plt.colorbar()
# plt.draw() # Ensure the plot is rendered
# plt.savefig(filename_hab, dpi=300)
# # plt.show()
# plt.close() # Close the figure to free memory
# # Add filename to the list
# filenames_hab.append(filename_hab)
# ### Movement probability surface
# # normalise the raster for plotting
# move_log_prob_norm = move_log_prob/-torch.sum(move_log_prob)
# # convert to numpy array
# move_log_prob_norm = move_log_prob_norm.detach().numpy()[:,:]
# # Save the figure
# filename_move = f"outputs/dl_prob_maps/id{buffalo_id}_move_log_prob_norm_{today_date}_{i}.png"
# plt.figure() # Create a new figure
# plt.imshow(move_log_prob_norm)
# plt.colorbar()
# plt.draw() # Ensure the plot is rendered
# plt.savefig(filename_move, dpi=300)
# # plt.show()
# plt.close() # Close the figure to free memory
# # Add filename to the list
# filenames_move.append(filename_move)
# ### Step selection probability surface
# # normalise the raster for plotting
# log_prob_surface_norm = step_log_prob/-torch.sum(step_log_prob)
# # convert to numpy array
# log_prob_surface_norm = log_prob_surface_norm.detach().numpy()[:,:]
# # Set the pixel of the next step, which is at (x, y) to 0
# log_prob_surface_norm[py, px] = -0.0001
# # Create the mask for x and y coordinates
# log_prob_surface_norm = log_prob_surface_norm * x_mask * y_mask
# # Save the figure
# filename = f"outputs/dl_prob_maps/ta_mix/id{buffalo_id}_step_log_prob_norm_{today_date}_{i}.png"
# plt.figure() # Create a new figure
# plt.imshow(log_prob_surface_norm)
# plt.colorbar()
# plt.draw() # Ensure the plot is rendered
# plt.savefig(filename, dpi=300)
# # plt.show()
# plt.close() # Close the figure to free memory
# # Add filename to the list
# filenames_step.append(filename)
# ### Step selection probability surface (exp)
# # normalise the raster for plotting
# prob_surface_norm = torch.exp(step_log_prob)/torch.sum(torch.exp(step_log_prob))
# # log_prob_surface_norm = step_log_prob/-torch.sum(step_log_prob)
# # convert to numpy array
# prob_surface_norm = prob_surface_norm.detach().numpy()[:,:]
# # Set the pixel of the next step, which is at (x, y) to 0
# prob_surface_norm[py, px] = 0.0
# # Save the figure
# filename = f"outputs/dl_prob_maps/ta_mix/id{buffalo_id}_prob_surface_{today_date}_{i}.png"
# plt.figure() # Create a new figure
# plt.imshow(prob_surface_norm)
# plt.colorbar()
# plt.draw() # Ensure the plot is rendered
# # plt.savefig(filename, dpi=300)
# plt.show()
# plt.close() # Close the figure to free memory
# # Add filename to the list
# filenames_step.append(filename)
Checking the model outputs¶
In [38]:
# print(bearing)
plt.hist(bearing[x>0], bins=50) #, edgecolor='black'
plt.title('Bearing values')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.show()
print(bearing.shape)
# Calculate the turning angle (difference between consecutive values)
ta = np.diff(bearing[x>0])
ta = np.insert(ta, 0, 0)
plt.hist(ta, bins=50) #, edgecolor='black'
plt.title('Turning angle values')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.show()
ta_corr = np.where(ta > np.pi, ta - (2 * np.pi),
np.where(ta < -np.pi, ta + (2 * np.pi), ta))
plt.hist(ta_corr, bins=50) #, edgecolor='black'
plt.title('Turning angle values')
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.show()
print(ta.shape)
step_log_prob_norm = step_log_prob/torch.sum(step_log_prob)
# print(step_log_prob_norm)
plt.imshow(step_log_prob.detach().numpy()[:,:])
plt.colorbar()
plt.show()
# check for NaN values
# print(torch.isnan(step_log_prob_norm).any())
step_prob = torch.exp(step_log_prob)
step_prob_norm = step_prob/torch.sum(step_prob)
# print(torch.sum(step_prob_norm))
plt.imshow(step_prob_norm.detach().numpy()[:,:])
plt.colorbar()
plt.show()
# # Create a histogram
# plt.hist(step_prob_norm.detach().numpy()[:,:], bins=100) #, edgecolor='black'
# # Add title and labels
# plt.title('Histogram Example')
# plt.xlabel('Value')
# plt.ylabel('Frequency')
# Show the plot
plt.show()
# ndvi_global_norm = (ndvi_global_tens - ndvi_min) / (ndvi_max - ndvi_min)
(3000,)
(3000,)
Plot the simulated trajectory¶
In [39]:
# Actual number of locations before there were NaNs
# print(x)
print(x[x>0].shape[0])
# Create a figure and axis with matplotlib
fig, ax = plt.subplots(figsize=(7.5, 7.5))
# Plot the raster
show(ndvi_global[which_ndvi,:,:], transform=raster_transform, ax=ax, cmap='viridis')
# Set the title and labels
ax.set_title('NDVI')
ax.set_xlabel('Easting')
ax.set_ylabel('Northing')
# Plot the simulated trajectory
no_sim_points = np.min([x[x>0].shape[0], y[y<0].shape[0]])
print(no_sim_points)
plt.plot(x[1:no_sim_points], y[1:no_sim_points], color = 'red')
plt.plot(x[1:no_sim_points], y[1:no_sim_points], color = 'red')
# Show the plot
plt.show()
3000 3000
Write the trajectory to a csv¶
Only run this once otherwise it will create duplicates¶
In [40]:
# Combine vectors into a DataFrame
trajectory_df = pd.DataFrame({'x': x[x>0],
'y': y[x>0],
'hour': hour_t2[x>0],
'yday': yday_t2[x>0],
'bearing': bearing[x>0],
'ta': ta})
n_steps_actual = x[x>0].shape[0]
# Save the DataFrame to a CSV file
index = 1
csv_filename = f'outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id{buffalo_id}_{n_steps_actual}steps_{index}_{today_date}.csv'
# Check if the file already exists and find a new name if necessary
while os.path.exists(csv_filename):
csv_filename = f'outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id{buffalo_id}_{n_steps_actual}steps_{index}_{today_date}.csv'
index += 1
print(csv_filename)
trajectory_df.to_csv(csv_filename, index=True)
outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_3000steps_1_2024-09-29.csv
Multiple trajectories in a loop¶
In [42]:
# Setup the simulation parameters
# here we just run a couple simulations to show that it's working
n_trajectories = 5
n_steps = 1000
starting_yday = 206
# global_raster_list = [ndvi_global_norm, canopy_global_norm, herby_global_norm, slope_global_norm]
window_size = 101
global_raster_transform = raster_transform
# Looping over individuals
for j in range(1, n_trajectories+1):
# Setup the vectors used in the prediction function
x = np.repeat(0., n_steps)
y = np.repeat(0., n_steps)
x[0], y[0] = start_x, start_y
# Create sequence of steps
step = range(1, n_steps)
# hour of the day (hour) sequence
hour_t2 = np.resize(range(24), n_steps)
# convert to sine and cosine
hour_t2_sin = np.sin(2*np.pi*hour_t2/24)
hour_t2_cos = np.cos(2*np.pi*hour_t2/24)
# Create the day of the year sequences
yday_t2 = np.repeat(range(starting_yday, starting_yday + 365), 24)
yday_t2 = np.resize(yday_t2, n_steps)
# convert to sine and cosine
yday_t2_sin = np.sin(2*np.pi*yday_t2/365)
yday_t2_cos = np.cos(2*np.pi*yday_t2/365)
# bearing vector
bearing = np.repeat(0., n_steps)
# Convert lists to PyTorch tensors
hour_t2_tensor = torch.tensor(hour_t2).float()
hour_t2_sin_tensor = torch.tensor(hour_t2_sin).float()
hour_t2_cos_tensor = torch.tensor(hour_t2_cos).float()
yday_t2_tensor = torch.tensor(yday_t2).float()
yday_t2_sin_tensor = torch.tensor(yday_t2_sin).float()
yday_t2_cos_tensor = torch.tensor(yday_t2_cos).float()
bearing_tensor = torch.tensor(bearing).float()
# Stack tensors column-wise to create a tensor of shape [n_steps, 4]
x2_full = torch.stack((hour_t2_sin_tensor, hour_t2_cos_tensor, yday_t2_sin_tensor, yday_t2_cos_tensor), dim=1)
# Initialize variables to cache the previous yday and month index
previous_yday = None
month_index = None
# simulation loop
for i in range(1, n_steps):
x_loc = x[i-1]
y_loc = y[i-1]
# calculate the bearing from the previous location
if i > 1:
bearing_rad = np.arctan2(y[i-1] - y[i-2], x[i-1] - x[i-2])
else:
bearing_rad = np.random.uniform(-np.pi, np.pi)
# store bearing in the vector
bearing[i-1] = bearing_rad
bearing_tensor = torch.tensor(bearing[i-1]).unsqueeze(0).unsqueeze(0)
x2 = x2_full[i,:].unsqueeze(dim=0)
# Determine the month index based on the day of the year
day_of_year = yday_t2[i-1]
if day_of_year != previous_yday:
month_index = day_to_month_index(day_of_year)
previous_yday = day_of_year
# print(f'Day of the year: {day_of_year}')
month_index = day_to_month_index(day_of_year)
# print(f'Month index: {month_index}')
global_raster_list = [ndvi_global_norm[month_index,:,:], canopy_global_norm, herby_global_norm, slope_global_norm]
sim_outputs = simulate_trajectory(global_raster_tensors=global_raster_list,
scalars_to_grid=x2,
bearing=bearing_tensor,
window_size=101,
x_loc=x_loc,
y_loc=y_loc,
global_raster_transform=global_raster_transform)
new_x, new_y, hab_log_prob, move_log_prob, step_log_prob, px, py = sim_outputs
x[i] = new_x
y[i] = new_y
# save the data frames individually
# Combine vectors into a DataFrame
trajectory_df = pd.DataFrame({'x': x[x>0],
'y': y[x>0],
'hour': hour_t2[x>0],
'yday': yday_t2[x>0],
'bearing': bearing[x>0]})
n_steps_actual = x[x>0].shape[0]
# Save the DataFrame to a CSV file
index = j
csv_filename = f'outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id{buffalo_id}_{n_steps_actual}steps_{index}_{today_date}.csv'
# Check if the file already exists and find a new name if necessary
while os.path.exists(csv_filename):
csv_filename = f'outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id{buffalo_id}_{n_steps_actual}steps_{index}_{today_date}.csv'
index += 1
print(csv_filename)
trajectory_df.to_csv(csv_filename, index=True)
outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_1000steps_1_2024-09-29.csv outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_1000steps_2_2024-09-29.csv outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_1000steps_3_2024-09-29.csv outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_1000steps_4_2024-09-29.csv outputs/dl_trajectories/ta_mix_monthly_ndvi/dl_trajectory_TAmix_id2005_1000steps_5_2024-09-29.csv